home *** CD-ROM | disk | FTP | other *** search
/ Software of the Month Club 2000 October / Software of the Month - Ultimate Collection Shareware 277.iso / pc / PROGRAMS / UTILITY / WINLINUX / DATA1.CAB / programs_-_include / ASM-I386 / UACCESS.H < prev    next >
C/C++ Source or Header  |  1999-09-17  |  17KB  |  605 lines

  1. #ifndef __i386_UACCESS_H
  2. #define __i386_UACCESS_H
  3.  
  4. /*
  5.  * User space memory access functions
  6.  */
  7. #include <linux/config.h>
  8. #include <linux/sched.h>
  9. #include <asm/page.h>
  10.  
  11. #define VERIFY_READ 0
  12. #define VERIFY_WRITE 1
  13.  
  14. /*
  15.  * The fs value determines whether argument validity checking should be
  16.  * performed or not.  If get_fs() == USER_DS, checking is performed, with
  17.  * get_fs() == KERNEL_DS, checking is bypassed.
  18.  *
  19.  * For historical reasons, these macros are grossly misnamed.
  20.  */
  21.  
  22. #define MAKE_MM_SEG(s)    ((mm_segment_t) { (s) })
  23.  
  24.  
  25. #define KERNEL_DS    MAKE_MM_SEG(0xFFFFFFFF)
  26. #define USER_DS        MAKE_MM_SEG(PAGE_OFFSET)
  27.  
  28. #define get_ds()    (KERNEL_DS)
  29. #define get_fs()    (current->addr_limit)
  30. #define set_fs(x)    (current->addr_limit = (x))
  31.  
  32. #define segment_eq(a,b)    ((a).seg == (b).seg)
  33.  
  34. extern int __verify_write(const void *, unsigned long);
  35.  
  36. #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
  37.  
  38. /*
  39.  * Uhhuh, this needs 33-bit arithmetic. We have a carry..
  40.  */
  41. #define __range_ok(addr,size) ({ \
  42.     unsigned long flag,sum; \
  43.     asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
  44.         :"=&r" (flag), "=r" (sum) \
  45.         :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
  46.     flag; })
  47.  
  48. #ifdef CONFIG_X86_WP_WORKS_OK
  49.  
  50. #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
  51.  
  52. #else
  53.  
  54. #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
  55.              ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
  56.              segment_eq(get_fs(),KERNEL_DS) || \
  57.               __verify_write((void *)(addr),(size))))
  58.  
  59. #endif /* CPU */
  60.  
  61. extern inline int verify_area(int type, const void * addr, unsigned long size)
  62. {
  63.     return access_ok(type,addr,size) ? 0 : -EFAULT;
  64. }
  65.  
  66.  
  67. /*
  68.  * The exception table consists of pairs of addresses: the first is the
  69.  * address of an instruction that is allowed to fault, and the second is
  70.  * the address at which the program should continue.  No registers are
  71.  * modified, so it is entirely up to the continuation code to figure out
  72.  * what to do.
  73.  *
  74.  * All the routines below use bits of fixup code that are out of line
  75.  * with the main instruction path.  This means when everything is well,
  76.  * we don't even have to jump over them.  Further, they do not intrude
  77.  * on our cache or tlb entries.
  78.  */
  79.  
  80. struct exception_table_entry
  81. {
  82.     unsigned long insn, fixup;
  83. };
  84.  
  85. /* Returns 0 if exception not found and fixup otherwise.  */
  86. extern unsigned long search_exception_table(unsigned long);
  87.  
  88.  
  89. /*
  90.  * These are the main single-value transfer routines.  They automatically
  91.  * use the right size if we just have the right pointer type.
  92.  *
  93.  * This gets kind of ugly. We want to return _two_ values in "get_user()"
  94.  * and yet we don't want to do any pointers, because that is too much
  95.  * of a performance impact. Thus we have a few rather ugly macros here,
  96.  * and hide all the uglyness from the user.
  97.  *
  98.  * The "__xxx" versions of the user access functions are versions that
  99.  * do not verify the address space, that must have been done previously
  100.  * with a separate "access_ok()" call (this is used when we do multiple
  101.  * accesses to the same area of user memory).
  102.  */
  103.  
  104. extern void __get_user_1(void);
  105. extern void __get_user_2(void);
  106. extern void __get_user_4(void);
  107.  
  108. #define __get_user_x(size,ret,x,ptr) \
  109.     __asm__ __volatile__("call __get_user_" #size \
  110.         :"=a" (ret),"=d" (x) \
  111.         :"0" (ptr))
  112.  
  113. /* Careful: we have to cast the result to the type of the pointer for sign reasons */
  114. #define get_user(x,ptr)                            \
  115. ({    int __ret_gu,__val_gu;                        \
  116.     switch(sizeof (*(ptr))) {                    \
  117.     case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;        \
  118.     case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;        \
  119.     case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;        \
  120.     default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;        \
  121.     }                                \
  122.     (x) = (__typeof__(*(ptr)))__val_gu;                \
  123.     __ret_gu;                            \
  124. })
  125.  
  126. extern void __put_user_1(void);
  127. extern void __put_user_2(void);
  128. extern void __put_user_4(void);
  129.  
  130. extern void __put_user_bad(void);
  131.  
  132. #define __put_user_x(size,ret,x,ptr)                    \
  133.     __asm__ __volatile__("call __put_user_" #size            \
  134.         :"=a" (ret)                        \
  135.         :"0" (ptr),"d" (x)                    \
  136.         :"cx")
  137.  
  138. #define put_user(x,ptr)                                    \
  139. ({    int __ret_pu;                                    \
  140.     switch(sizeof (*(ptr))) {                            \
  141.     case 1:  __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break;        \
  142.     case 2:  __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break;        \
  143.     case 4:  __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break;        \
  144.     default: __put_user_x(X,__ret_pu,x,ptr); break;                    \
  145.     }                                        \
  146.     __ret_pu;                                    \
  147. })
  148.  
  149. #define __get_user(x,ptr) \
  150.   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
  151. #define __put_user(x,ptr) \
  152.   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  153.  
  154. #define __put_user_nocheck(x,ptr,size)            \
  155. ({                            \
  156.     long __pu_err;                    \
  157.     __put_user_size((x),(ptr),(size),__pu_err);    \
  158.     __pu_err;                    \
  159. })
  160.  
  161. #define __put_user_size(x,ptr,size,retval)                \
  162. do {                                    \
  163.     retval = 0;                            \
  164.     switch (size) {                            \
  165.       case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break;    \
  166.       case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break;    \
  167.       case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break;    \
  168.       default: __put_user_bad();                    \
  169.     }                                \
  170. } while (0)
  171.  
  172. struct __large_struct { unsigned long buf[100]; };
  173. #define __m(x) (*(struct __large_struct *)(x))
  174.  
  175. /*
  176.  * Tell gcc we read from memory instead of writing: this is because
  177.  * we do not write to any memory gcc knows about, so there are no
  178.  * aliasing issues.
  179.  */
  180. #define __put_user_asm(x, addr, err, itype, rtype, ltype)    \
  181.     __asm__ __volatile__(                    \
  182.         "1:    mov"itype" %"rtype"1,%2\n"        \
  183.         "2:\n"                        \
  184.         ".section .fixup,\"ax\"\n"            \
  185.         "3:    movl %3,%0\n"                \
  186.         "    jmp 2b\n"                \
  187.         ".previous\n"                    \
  188.         ".section __ex_table,\"a\"\n"            \
  189.         "    .align 4\n"                \
  190.         "    .long 1b,3b\n"                \
  191.         ".previous"                    \
  192.         : "=r"(err)                    \
  193.         : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  194.  
  195.  
  196. #define __get_user_nocheck(x,ptr,size)                \
  197. ({                                \
  198.     long __gu_err, __gu_val;                \
  199.     __get_user_size(__gu_val,(ptr),(size),__gu_err);    \
  200.     (x) = (__typeof__(*(ptr)))__gu_val;            \
  201.     __gu_err;                        \
  202. })
  203.  
  204. extern long __get_user_bad(void);
  205.  
  206. #define __get_user_size(x,ptr,size,retval)                \
  207. do {                                    \
  208.     retval = 0;                            \
  209.     switch (size) {                            \
  210.       case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break;    \
  211.       case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break;    \
  212.       case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break;    \
  213.       default: (x) = __get_user_bad();                \
  214.     }                                \
  215. } while (0)
  216.  
  217. #define __get_user_asm(x, addr, err, itype, rtype, ltype)    \
  218.     __asm__ __volatile__(                    \
  219.         "1:    mov"itype" %2,%"rtype"1\n"        \
  220.         "2:\n"                        \
  221.         ".section .fixup,\"ax\"\n"            \
  222.         "3:    movl %3,%0\n"                \
  223.         "    xor"itype" %"rtype"1,%"rtype"1\n"    \
  224.         "    jmp 2b\n"                \
  225.         ".previous\n"                    \
  226.         ".section __ex_table,\"a\"\n"            \
  227.         "    .align 4\n"                \
  228.         "    .long 1b,3b\n"                \
  229.         ".previous"                    \
  230.         : "=r"(err), ltype (x)                \
  231.         : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  232.  
  233. /*
  234.  * The "xxx_ret" versions return constant specified in third argument, if
  235.  * something bad happens. These macros can be optimized for the
  236.  * case of just returning from the function xxx_ret is used.
  237.  */
  238.  
  239. #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
  240.  
  241. #define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
  242.  
  243. #define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
  244.  
  245. #define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
  246.  
  247.  
  248. /*
  249.  * Copy To/From Userspace
  250.  */
  251.  
  252. /* Generic arbitrary sized copy.  */
  253. #define __copy_user(to,from,size)                    \
  254. do {                                    \
  255.     int __d0, __d1;                            \
  256.     __asm__ __volatile__(                        \
  257.         "0:    rep; movsl\n"                    \
  258.         "    movl %3,%0\n"                    \
  259.         "1:    rep; movsb\n"                    \
  260.         "2:\n"                            \
  261.         ".section .fixup,\"ax\"\n"                \
  262.         "3:    lea 0(%3,%0,4),%0\n"                \
  263.         "    jmp 2b\n"                    \
  264.         ".previous\n"                        \
  265.         ".section __ex_table,\"a\"\n"                \
  266.         "    .align 4\n"                    \
  267.         "    .long 0b,3b\n"                    \
  268.         "    .long 1b,2b\n"                    \
  269.         ".previous"                        \
  270.         : "=&c"(size), "=&D" (__d0), "=&S" (__d1)        \
  271.         : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)    \
  272.         : "memory");                        \
  273. } while (0)
  274.  
  275. #define __copy_user_zeroing(to,from,size)                \
  276. do {                                    \
  277.     int __d0, __d1;                            \
  278.     __asm__ __volatile__(                        \
  279.         "0:    rep; movsl\n"                    \
  280.         "    movl %3,%0\n"                    \
  281.         "1:    rep; movsb\n"                    \
  282.         "2:\n"                            \
  283.         ".section .fixup,\"ax\"\n"                \
  284.         "3:    lea 0(%3,%0,4),%0\n"                \
  285.         "4:    pushl %0\n"                    \
  286.         "    pushl %%eax\n"                    \
  287.         "    xorl %%eax,%%eax\n"                \
  288.         "    rep; stosb\n"                    \
  289.         "    popl %%eax\n"                    \
  290.         "    popl %0\n"                    \
  291.         "    jmp 2b\n"                    \
  292.         ".previous\n"                        \
  293.         ".section __ex_table,\"a\"\n"                \
  294.         "    .align 4\n"                    \
  295.         "    .long 0b,3b\n"                    \
  296.         "    .long 1b,4b\n"                    \
  297.         ".previous"                        \
  298.         : "=&c"(size), "=&D" (__d0), "=&S" (__d1)        \
  299.         : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)    \
  300.         : "memory");                        \
  301. } while (0)
  302.  
  303. /* We let the __ versions of copy_from/to_user inline, because they're often
  304.  * used in fast paths and have only a small space overhead.
  305.  */
  306. static inline unsigned long
  307. __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  308. {
  309.     __copy_user_zeroing(to,from,n);
  310.     return n;
  311. }
  312.  
  313. static inline unsigned long
  314. __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  315. {
  316.     __copy_user(to,from,n);
  317.     return n;
  318. }
  319.  
  320.  
  321. /* Optimize just a little bit when we know the size of the move. */
  322. #define __constant_copy_user(to, from, size)            \
  323. do {                                \
  324.     int __d0, __d1;                        \
  325.     switch (size & 3) {                    \
  326.     default:                        \
  327.         __asm__ __volatile__(                \
  328.             "0:    rep; movsl\n"            \
  329.             "1:\n"                    \
  330.             ".section .fixup,\"ax\"\n"        \
  331.             "2:    shl $2,%0\n"            \
  332.             "    jmp 1b\n"            \
  333.             ".previous\n"                \
  334.             ".section __ex_table,\"a\"\n"        \
  335.             "    .align 4\n"            \
  336.             "    .long 0b,2b\n"            \
  337.             ".previous"                \
  338.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  339.             : "1"(from), "2"(to), "0"(size/4)    \
  340.             : "memory");                \
  341.         break;                        \
  342.     case 1:                            \
  343.         __asm__ __volatile__(                \
  344.             "0:    rep; movsl\n"            \
  345.             "1:    movsb\n"            \
  346.             "2:\n"                    \
  347.             ".section .fixup,\"ax\"\n"        \
  348.             "3:    shl $2,%0\n"            \
  349.             "4:    incl %0\n"            \
  350.             "    jmp 2b\n"            \
  351.             ".previous\n"                \
  352.             ".section __ex_table,\"a\"\n"        \
  353.             "    .align 4\n"            \
  354.             "    .long 0b,3b\n"            \
  355.             "    .long 1b,4b\n"            \
  356.             ".previous"                \
  357.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  358.             : "1"(from), "2"(to), "0"(size/4)    \
  359.             : "memory");                \
  360.         break;                        \
  361.     case 2:                            \
  362.         __asm__ __volatile__(                \
  363.             "0:    rep; movsl\n"            \
  364.             "1:    movsw\n"            \
  365.             "2:\n"                    \
  366.             ".section .fixup,\"ax\"\n"        \
  367.             "3:    shl $2,%0\n"            \
  368.             "4:    addl $2,%0\n"            \
  369.             "    jmp 2b\n"            \
  370.             ".previous\n"                \
  371.             ".section __ex_table,\"a\"\n"        \
  372.             "    .align 4\n"            \
  373.             "    .long 0b,3b\n"            \
  374.             "    .long 1b,4b\n"            \
  375.             ".previous"                \
  376.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  377.             : "1"(from), "2"(to), "0"(size/4)    \
  378.             : "memory");                \
  379.         break;                        \
  380.     case 3:                            \
  381.         __asm__ __volatile__(                \
  382.             "0:    rep; movsl\n"            \
  383.             "1:    movsw\n"            \
  384.             "2:    movsb\n"            \
  385.             "3:\n"                    \
  386.             ".section .fixup,\"ax\"\n"        \
  387.             "4:    shl $2,%0\n"            \
  388.             "5:    addl $2,%0\n"            \
  389.             "6:    incl %0\n"            \
  390.             "    jmp 3b\n"            \
  391.             ".previous\n"                \
  392.             ".section __ex_table,\"a\"\n"        \
  393.             "    .align 4\n"            \
  394.             "    .long 0b,4b\n"            \
  395.             "    .long 1b,5b\n"            \
  396.             "    .long 2b,6b\n"            \
  397.             ".previous"                \
  398.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  399.             : "1"(from), "2"(to), "0"(size/4)    \
  400.             : "memory");                \
  401.         break;                        \
  402.     }                            \
  403. } while (0)
  404.  
  405. /* Optimize just a little bit when we know the size of the move. */
  406. #define __constant_copy_user_zeroing(to, from, size)        \
  407. do {                                \
  408.     int __d0, __d1;                        \
  409.     switch (size & 3) {                    \
  410.     default:                        \
  411.         __asm__ __volatile__(                \
  412.             "0:    rep; movsl\n"            \
  413.             "1:\n"                    \
  414.             ".section .fixup,\"ax\"\n"        \
  415.             "2:    pushl %0\n"            \
  416.             "    pushl %%eax\n"            \
  417.             "    xorl %%eax,%%eax\n"        \
  418.             "    rep; stosl\n"            \
  419.             "    popl %%eax\n"            \
  420.             "    popl %0\n"            \
  421.             "    shl $2,%0\n"            \
  422.             "    jmp 1b\n"            \
  423.             ".previous\n"                \
  424.             ".section __ex_table,\"a\"\n"        \
  425.             "    .align 4\n"            \
  426.             "    .long 0b,2b\n"            \
  427.             ".previous"                \
  428.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  429.             : "1"(from), "2"(to), "0"(size/4)    \
  430.             : "memory");                \
  431.         break;                        \
  432.     case 1:                            \
  433.         __asm__ __volatile__(                \
  434.             "0:    rep; movsl\n"            \
  435.             "1:    movsb\n"            \
  436.             "2:\n"                    \
  437.             ".section .fixup,\"ax\"\n"        \
  438.             "3:    pushl %0\n"            \
  439.             "    pushl %%eax\n"            \
  440.             "    xorl %%eax,%%eax\n"        \
  441.             "    rep; stosl\n"            \
  442.             "    stosb\n"            \
  443.             "    popl %%eax\n"            \
  444.             "    popl %0\n"            \
  445.             "    shl $2,%0\n"            \
  446.             "    incl %0\n"            \
  447.             "    jmp 2b\n"            \
  448.             "4:    pushl %%eax\n"            \
  449.             "    xorl %%eax,%%eax\n"        \
  450.             "    stosb\n"            \
  451.             "    popl %%eax\n"            \
  452.             "    incl %0\n"            \
  453.             "    jmp 2b\n"            \
  454.             ".previous\n"                \
  455.             ".section __ex_table,\"a\"\n"        \
  456.             "    .align 4\n"            \
  457.             "    .long 0b,3b\n"            \
  458.             "    .long 1b,4b\n"            \
  459.             ".previous"                \
  460.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  461.             : "1"(from), "2"(to), "0"(size/4)    \
  462.             : "memory");                \
  463.         break;                        \
  464.     case 2:                            \
  465.         __asm__ __volatile__(                \
  466.             "0:    rep; movsl\n"            \
  467.             "1:    movsw\n"            \
  468.             "2:\n"                    \
  469.             ".section .fixup,\"ax\"\n"        \
  470.             "3:    pushl %0\n"            \
  471.             "    pushl %%eax\n"            \
  472.             "    xorl %%eax,%%eax\n"        \
  473.             "    rep; stosl\n"            \
  474.             "    stosw\n"            \
  475.             "    popl %%eax\n"            \
  476.             "    popl %0\n"            \
  477.             "    shl $2,%0\n"            \
  478.             "    addl $2,%0\n"            \
  479.             "    jmp 2b\n"            \
  480.             "4:    pushl %%eax\n"            \
  481.             "    xorl %%eax,%%eax\n"        \
  482.             "    stosw\n"            \
  483.             "    popl %%eax\n"            \
  484.             "    addl $2,%0\n"            \
  485.             "    jmp 2b\n"            \
  486.             ".previous\n"                \
  487.             ".section __ex_table,\"a\"\n"        \
  488.             "    .align 4\n"            \
  489.             "    .long 0b,3b\n"            \
  490.             "    .long 1b,4b\n"            \
  491.             ".previous"                \
  492.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  493.             : "1"(from), "2"(to), "0"(size/4)    \
  494.             : "memory");                \
  495.         break;                        \
  496.     case 3:                            \
  497.         __asm__ __volatile__(                \
  498.             "0:    rep; movsl\n"            \
  499.             "1:    movsw\n"            \
  500.             "2:    movsb\n"            \
  501.             "3:\n"                    \
  502.             ".section .fixup,\"ax\"\n"        \
  503.             "4:    pushl %0\n"            \
  504.             "    pushl %%eax\n"            \
  505.             "    xorl %%eax,%%eax\n"        \
  506.             "    rep; stosl\n"            \
  507.             "    stosw\n"            \
  508.             "    stosb\n"            \
  509.             "    popl %%eax\n"            \
  510.             "    popl %0\n"            \
  511.             "    shl $2,%0\n"            \
  512.             "    addl $3,%0\n"            \
  513.             "    jmp 2b\n"            \
  514.             "5:    pushl %%eax\n"            \
  515.             "    xorl %%eax,%%eax\n"        \
  516.             "    stosw\n"            \
  517.             "    stosb\n"            \
  518.             "    popl %%eax\n"            \
  519.             "    addl $3,%0\n"            \
  520.             "    jmp 2b\n"            \
  521.             "6:    pushl %%eax\n"            \
  522.             "    xorl %%eax,%%eax\n"        \
  523.             "    stosb\n"            \
  524.             "    popl %%eax\n"            \
  525.             "    incl %0\n"            \
  526.             "    jmp 2b\n"            \
  527.             ".previous\n"                \
  528.             ".section __ex_table,\"a\"\n"        \
  529.             "    .align 4\n"            \
  530.             "    .long 0b,4b\n"            \
  531.             "    .long 1b,5b\n"            \
  532.             "    .long 2b,6b\n"            \
  533.             ".previous"                \
  534.             : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  535.             : "1"(from), "2"(to), "0"(size/4)    \
  536.             : "memory");                \
  537.         break;                        \
  538.     }                            \
  539. } while (0)
  540.  
  541. unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
  542. unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
  543.  
  544. static inline unsigned long
  545. __constant_copy_to_user(void *to, const void *from, unsigned long n)
  546. {
  547.     if (access_ok(VERIFY_WRITE, to, n))
  548.         __constant_copy_user(to,from,n);
  549.     return n;
  550. }
  551.  
  552. static inline unsigned long
  553. __constant_copy_from_user(void *to, const void *from, unsigned long n)
  554. {
  555.     if (access_ok(VERIFY_READ, from, n))
  556.         __constant_copy_user_zeroing(to,from,n);
  557.     return n;
  558. }
  559.  
  560. static inline unsigned long
  561. __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  562. {
  563.     __constant_copy_user(to,from,n);
  564.     return n;
  565. }
  566.  
  567. static inline unsigned long
  568. __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  569. {
  570.     __constant_copy_user_zeroing(to,from,n);
  571.     return n;
  572. }
  573.  
  574. #define copy_to_user(to,from,n)                \
  575.     (__builtin_constant_p(n) ?            \
  576.      __constant_copy_to_user((to),(from),(n)) :    \
  577.      __generic_copy_to_user((to),(from),(n)))
  578.  
  579. #define copy_from_user(to,from,n)            \
  580.     (__builtin_constant_p(n) ?            \
  581.      __constant_copy_from_user((to),(from),(n)) :    \
  582.      __generic_copy_from_user((to),(from),(n)))
  583.  
  584. #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
  585.  
  586. #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
  587.  
  588. #define __copy_to_user(to,from,n)            \
  589.     (__builtin_constant_p(n) ?            \
  590.      __constant_copy_to_user_nocheck((to),(from),(n)) :    \
  591.      __generic_copy_to_user_nocheck((to),(from),(n)))
  592.  
  593. #define __copy_from_user(to,from,n)            \
  594.     (__builtin_constant_p(n) ?            \
  595.      __constant_copy_from_user_nocheck((to),(from),(n)) :    \
  596.      __generic_copy_from_user_nocheck((to),(from),(n)))
  597.  
  598. long strncpy_from_user(char *dst, const char *src, long count);
  599. long __strncpy_from_user(char *dst, const char *src, long count);
  600. long strlen_user(const char *str);
  601. unsigned long clear_user(void *mem, unsigned long len);
  602. unsigned long __clear_user(void *mem, unsigned long len);
  603.  
  604. #endif /* __i386_UACCESS_H */
  605.